#import packages
import pandas as pd
import scipy as sp, numpy as np
from sklearn import preprocessing, model_selection
from sklearn.preprocessing import scale, StandardScaler, normalize, Normalizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import ShuffleSplit, LeaveOneGroupOut, LeaveOneOut, train_test_split, learning_curve, GridSearchCV, cross_val_score, cross_val_predict
import matplotlib.pyplot as plt
import scipy.io as io
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
import warnings
#error handling
warnings.filterwarnings('ignore')
#define functions
def display_scores(scores):
i_mean[i-1] = scores.mean()
i_std[i-1] = scores.std()
return i_mean, i_std
def moving_average(a, n) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--')
def vip(model):
t = model.x_scores_
w = model.x_weights_
q = model.y_loadings_
p, h = w.shape
vips = np.zeros((p,))
s = np.diag(t.T @ t @ q.T @ q).reshape(h, -1)
total_s = np.sum(s)
for i in range(p):
weight = np.array([ (w[i,j] / np.linalg.norm(w[:,j]))**2 for j in range(h) ])
vips[i] = np.sqrt(p*(s.T @ weight)/total_s)
return vips
# Learning Curves
#cross validation
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Parameters
----------
estimator : estimator instance
An estimator instance implementing `fit` and `predict` methods which
will be cloned for each validation.
title : str
Title for the chart.
X : array-like of shape (n_samples, n_features)
Training vector, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
y : array-like of shape (n_samples) or (n_samples, n_features)
Target relative to ``X`` for classification or regression;
None for unsupervised learning.
axes : array-like of shape (3,), default=None
Axes to use for plotting the curves.
ylim : tuple of shape (2,), default=None
Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like of shape (n_ticks,)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the ``dtype`` is float, it is regarded
as a fraction of the maximum size of the training set (that is
determined by the selected validation method), i.e. it has to be within
(0, 1]. Otherwise it is interpreted as absolute sizes of the training
sets. Note that for classification the number of samples usually have
to be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
class Sample:
instances=[]
"""Sample representing in vivo voltammograms"""
def __init__(self,myName=0,myData=0,myTime=0,myStimType=0,myStimLength=0,myStimFreq=0,myPosition=0,myBehavior=0,myModel=0,
myNumComp=0,myPreProcess = 0, myHyperParams = 0,mySampleName=0,myDrug=0,myAlignedData=0, myBrainRegion=0,
my5HTbasal=0,my5HTphasic=0,myDAbasal=0,myDAphasic=0):
self._Name = myName
self._Data = myData
self._Time = myTime
self._StimType = myStimType
self._StimLength = myStimLength
self._StimFreq = myStimFreq
self._Position = myPosition
self._Behavior = myBehavior
self._Model = myModel
self._NumComp = myNumComp
self._PreProcess = myPreProcess
self._HyperParams = myHyperParams
self._SampleName = mySampleName
self._Drug = myDrug
self._AlignedData = myAlignedData
self._BrainRegion = myBrainRegion
self._5HT_Basal = my5HTbasal
self._5HT_Phasic = my5HTphasic
self._DA_Basal = myDAbasal
self._DA_Phasic = myDAphasic
Sample.instances.append(self)
def set_StimFreq(self,stimFreq):
self._StimFreq = stimFreq
def set_StimLength(self,stimLength):
self._StimLength = stimLength
def set_Position(self,position):
self._Position = position
def set_Drug(self,drug):
self._Drug = drug
#insert file path below
file_location=r'C:\Users\csmov\fscv_fpv_HYdata\in vivo Data FSCV_FPV_2020\in vivo Data FSCV_FPV_2020\Master_Combined\S1459_S1447_S1504_Combined.xlsx'
sheetName = 'NBS NA PulseCut' #BS or NBS, NA or 25A, PulseCut or TriCut
#Hyperparameters
nComponents = 2
#Preprocessing
preProcess = 'Normalize' #'No Scale', 'Scale Features', 'Scale Samples', or 'Normalize'
myNorm = 'max' #'max', 'l2' or 'l1'
#Cross-validation
cvFolds = 18
#Model
modelChoice = 'PCR' #'PLSR', 'PCR'
#import data
df_RPV = pd.read_excel(file_location, sheet_name=sheetName) #dataframe for voltammogram response
df_RPV_T = df_RPV.T #transpose to get in correct format for sklearn
df_ConcAll = pd.read_excel(file_location, sheet_name = 'Conc') #dataframe for concentrations of df_RPV responses
df_ConcDA = df_ConcAll[["DA"]] # contains only dopamine (DA) concentrations
df_ConcHT = df_ConcAll[["HT"]] # contains only serotonin (5HT) concentrations
df_Samples = pd.DataFrame((list(df_RPV.columns)),columns=['Sample']) #extracts sample code
X = df_RPV_T.to_numpy();
X_raw = df_RPV_T.to_numpy();
y = df_ConcAll.to_numpy();
y_DA = df_ConcAll['DA'].to_numpy();
y_5HT = df_ConcAll['HT'].to_numpy();
list_Samples = df_Samples.values.tolist();
plt.plot(X.T);
plt.xlabel("Sample");
plt.ylabel("Current (nA)");
plt.title("Overlaid Voltammograms");
#preprocess
if preProcess == 'Scale Features':
preProcesser = preprocessing.StandardScaler().fit(X)
plt.ylabel("Scaled Current (nA)");
plt.title("Scaled Feature Voltammograms");
if preProcess == 'Normalize':
preProcesser = preprocessing.Normalizer(norm=myNorm).fit(X)
plt.ylabel("Normalized Current (nA)");
plt.title("Normalized Voltammograms");
if preProcess == 'No Scale':
preProcesser = preprocessing.StandardScaler(with_mean=False,with_std=False).fit(X)
plt.ylabel("Scaled Current (nA)");
plt.title("Non-scaled Voltammograms");
if preProcess == 'Scale Samples':
preProcesser = preprocessing.StandardScaler().fit(X.T)
plt.ylabel("Scaled Current (nA)");
plt.title("Scaled Sample Voltammograms");
X = preProcesser.transform(X)
plt.plot(X.T);
plt.xlabel("Sample");
#PCA of Y
#ignore warnings; may need to update get_value in future.
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
pca = PCA(n_components = 2)
principalComponents = pca.fit_transform(df_ConcAll)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, df_Samples], axis = 1)
# plot and label the data in PC space
for i in range(len(df_Samples)):
plt.scatter(finalDf.get_value(i,'principal component 1'),finalDf.get_value(i,'principal component 2'))
plt.annotate(finalDf.get_value(i,"Sample"), (finalDf.get_value(i,'principal component 1'),finalDf.get_value(i,'principal component 2')))
plt.plot([-5,0,5],[0,0,0],c='k')
plt.plot([0,0,0],[-3,0,3],c='k')
plt.title("PCs in 2D")
plt.xlabel("PC1")
plt.ylabel("PC2")
print("PC1 % Explained Variance:", round(pca.explained_variance_ratio_[0]*100,2), "\nPC2 % Explained Variance:", round(pca.explained_variance_ratio_[1]*100,2))
PC1 % Explained Variance: 60.72 PC2 % Explained Variance: 39.28
#PCA of X
#ignore warnings; may need to update get_value in future.
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
pca = PCA(n_components = 2)
principalComponents = pca.fit_transform(X)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, df_Samples], axis = 1)
# plot and label the data in PC space
for i in range(len(df_Samples)):
plt.scatter(finalDf.get_value(i,'principal component 1'),finalDf.get_value(i,'principal component 2'))
plt.annotate(finalDf.get_value(i,"Sample"), (finalDf.get_value(i,'principal component 1'),finalDf.get_value(i,'principal component 2')))
plt.title("PCs in 2D")
plt.xlabel("PC1")
plt.ylabel("PC2")
print("PC1 % Explained Variance:",round(pca.explained_variance_ratio_[0]*100,2), "\nPC2 % Explained Variance:", round(pca.explained_variance_ratio_[1]*100,2))
pca = PCA(n_components = 0.95)
pca.fit(X)
cumsum = np.cumsum(pca.explained_variance_ratio_)
print("Num Components for 95% Variance Explained:", pca.n_components_)
print("PC1 % Explained Variance:", np.round(cumsum,3)[0],"\nPC2 % Explained Variance:", np.round(cumsum,3)[1])
PC1 % Explained Variance: 78.85 PC2 % Explained Variance: 14.13 Num Components for 95% Variance Explained: 3 PC1 % Explained Variance: 0.789 PC2 % Explained Variance: 0.93
var_exp_DA = np.zeros(len(df_RPV_T))
var_exp_HT = np.zeros(len(df_RPV_T))
var_exp_All = np.zeros(len(df_RPV_T))
i_values = np.arange(1,len(df_RPV_T)+1)
for i in i_values:
if modelChoice == 'PLSR':
myModel = make_pipeline(preProcesser, PLSRegression(n_components=i,scale=False))
myModel.fit_transform(X, y)
if modelChoice == 'PCR':
myModel = make_pipeline(preProcesser, PCA(n_components=i), LinearRegression())
myModel.fit(X, y)
y_pred = myModel.predict(X)
var_exp_DA[i-1] = r2_score(y,y_pred,multioutput='raw_values')[0]
var_exp_HT[i-1] = r2_score(y,y_pred,multioutput='raw_values')[1]
var_exp_All[i-1] = r2_score(y,y_pred)
df_var_exp_All=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), All": np.round(100*var_exp_All,2)})
print(df_var_exp_All,'\n');
df_var_exp_DA=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), DA": np.round(100*var_exp_DA,2)})
print(df_var_exp_DA,'\n');
df_var_exp_HT=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), HT": np.round(100*var_exp_HT,2)})
print(df_var_exp_HT);
plt.plot(i_values,var_exp_All,label='All');
plt.plot(i_values,var_exp_HT,label="HT");
plt.plot(i_values,var_exp_DA,label='DA');
plt.legend()
plt.title("Variance Explained by N Components");
plt.xlabel("Number Components");
plt.ylabel("Variance Explained");
Number Components Variance Explained (%), All
0 1 0.92
1 2 35.63
2 3 42.08
3 4 79.59
4 5 87.56
5 6 90.06
6 7 90.64
7 8 94.53
8 9 96.03
9 10 96.06
10 11 96.82
11 12 97.38
12 13 97.52
13 14 99.00
14 15 99.38
15 16 99.50
16 17 100.00
17 18 100.00
Number Components Variance Explained (%), DA
0 1 0.02
1 2 68.89
2 3 76.00
3 4 83.63
4 5 85.16
5 6 86.80
6 7 86.80
7 8 93.80
8 9 95.40
9 10 95.43
10 11 96.21
11 12 96.68
12 13 96.85
13 14 99.38
14 15 99.42
15 16 99.67
16 17 100.00
17 18 100.00
Number Components Variance Explained (%), HT
0 1 1.82
1 2 2.37
2 3 8.17
3 4 75.55
4 5 89.96
5 6 93.32
6 7 94.48
7 8 95.27
8 9 96.67
9 10 96.69
10 11 97.42
11 12 98.08
12 13 98.18
13 14 98.62
14 15 99.34
15 16 99.34
16 17 100.00
17 18 100.00
i_mean = np.zeros(len(df_RPV_T)-1)
i_std =np.zeros(len(df_RPV_T)-1)
for i in i_values[:-1]:
if modelChoice == 'PLSR':
myModel = make_pipeline(preProcesser, PLSRegression(n_components=i,scale=False))
if modelChoice == 'PCR':
myModel = make_pipeline(preProcesser, PCA(n_components=i), LinearRegression())
scores = cross_val_score(myModel, X, y, scoring="neg_mean_squared_error", cv=cvFolds)
model_rmse_scores = np.sqrt(-scores)
display_scores(model_rmse_scores)
plt.show()
plt.plot(i_values[:-1],i_mean)
plt.xlabel("Number Components");
plt.ylabel("Mean Scores (RMSE)");
plt.show()
plt.plot(i_values[:-1],i_std)
plt.xlabel("Number Components");
plt.ylabel("Std Dev");
if modelChoice == 'PLSR':
PLSRmodel=PLSRegression(n_components=nComponents,scale=False) #TODO
PLSRmodel.fit_transform(X,y)
vipPLSR = vip(PLSRmodel)
plt.plot(vip(PLSRmodel))
plt.xlabel("Feature")
plt.ylabel('VIP Score')
plt.title('VIP Scores')
plt.show()
plt.plot(df_RPV)
plt.xlabel("Feature")
plt.ylabel('Current (nA)')
plt.title('Voltammogram')
plt.show()
countOver1 = 0
for i in range(len(vipPLSR)):
if vipPLSR[i] >= 1:
countOver1 += 1
print('Number Features with VIP > or = 1:',countOver1)
plt.plot(moving_average(vipPLSR,n=10))
plt.xlabel("Feature")
plt.ylabel('Moving Average VIP Score')
plt.title('Moving Average of VIP Scores')
abline(0,1)
#TODO: Set grid search params for PLSR and PCR
if preProcess == 'Scale Features' and modelChoice == 'PLSR':
param_grid = [
{'plsregression__n_components': range(len(df_RPV_T)),'standardscaler__with_mean': [True,False],
'standardscaler__with_std': [True,False]}]
if preProcess == 'Scale Features' and modelChoice == 'PCR':
param_grid = [
{'pca__n_components': range(len(df_RPV_T)),'standardscaler__with_mean': [True,False],
'standardscaler__with_std': [True,False]}]
if preProcess == 'No Scale' and modelChoice == 'PLSR':
param_grid = [
{'plsregression__n_components': range(len(df_RPV_T))}]
if preProcess == 'No Scale' and modelChoice == 'PCR':
param_grid = [
{'pca__n_components': range(len(df_RPV_T)),'linearregression__normalize':[True,False]}]
if preProcess == 'Scale Samples' and modelChoice == 'PLSR':
param_grid = [
{'plsregression__n_components': range(len(df_RPV_T)), 'plsregression__scale':[True, False]}]
if preProcess == 'Scale Samples' and modelChoice == 'PCR':
param_grid = [
{'pca__n_components': range(len(df_RPV_T)), 'linearregression__normalize':[True, False]}]
if preProcess == 'Normalize' and modelChoice == 'PLSR':
param_grid = [
{'plsregression__n_components': range(len(df_RPV_T)),'normalizer__norm': ['l1', 'l2', 'max']}]
if preProcess == 'Normalize' and modelChoice == 'PCR':
param_grid = [
{'pca__n_components': range(len(df_RPV_T)),'normalizer__norm': ['l1', 'l2', 'max']}]
if modelChoice == 'PLSR':
GSmodel = make_pipeline(preProcesser, PLSRegression(scale=False))
if modelChoice == 'PCR':
GSmodel = make_pipeline(preProcesser, PCA(), LinearRegression())
grid_search = GridSearchCV(GSmodel, param_grid, cv=cvFolds,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(X,y);
print(grid_search.best_params_, grid_search.best_estimator_)
{'normalizer__norm': 'max', 'pca__n_components': 9} Pipeline(memory=None,
steps=[('normalizer', Normalizer(copy=True, norm='max')),
('pca',
PCA(copy=True, iterated_power='auto', n_components=9,
random_state=None, svd_solver='auto', tol=0.0,
whiten=False)),
('linearregression',
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,
normalize=False))],
verbose=False)
for cvFold in list(range(2,cvFolds+1)):
CV_var_exp_DA = np.zeros(len(df_RPV_T))
CV_var_exp_HT = np.zeros(len(df_RPV_T))
CV_var_exp_All = np.zeros(len(df_RPV_T))
print(cvFold)
for i in 2,3,5:
if modelChoice == 'PLSR':
myModel = make_pipeline(preProcesser, PLSRegression(n_components=i,scale=False))
if modelChoice == 'PCR':
myModel = make_pipeline(preProcesser, PCA(n_components=i), LinearRegression())
y_train_pred = cross_val_predict(myModel, X_raw, y, cv=cvFold)
CV_var_exp_DA[i-1] = r2_score(y_DA,y_train_pred[:,0])
CV_var_exp_HT[i-1] = r2_score(y_5HT,y_train_pred[:,1])
CV_var_exp_All[i-1] = r2_score(y,y_train_pred)
df_CV_var_exp_All=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), All CV": np.round(100*CV_var_exp_All,2)})
print(df_CV_var_exp_All,'\n');
df_CV_var_exp_DA=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), DA CV": np.round(100*CV_var_exp_DA,2)})
print(df_CV_var_exp_DA,'\n');
df_CV_var_exp_HT=pd.DataFrame({"Number Components": i_values, "Variance Explained (%), HT CV": np.round(100*CV_var_exp_HT,2)})
print(df_CV_var_exp_HT);
plt.plot(i_values,CV_var_exp_All,label='All CV');
plt.plot(i_values,CV_var_exp_HT,label="HT CV");
plt.plot(i_values,CV_var_exp_DA,label='DA CV');
plt.legend()
plt.title("Variance Explained by N Components during "+str(cvFold)+" Fold CV");
plt.xlabel("Number Components");
plt.ylabel("Variance Explained");
plt.show()
2
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 -22.48
2 3 -99.99
3 4 0.00
4 5 -71.64
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 39.47
2 3 26.83
3 4 0.00
4 5 -177.24
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -84.42
2 3 -226.81
3 4 0.00
4 5 33.95
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
3
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 24.98
2 3 -183.47
3 4 0.00
4 5 4.29
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 54.58
2 3 24.82
3 4 0.00
4 5 2.96
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -4.62
2 3 -391.75
3 4 0.00
4 5 5.61
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
4
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 8.43
2 3 10.31
3 4 0.00
4 5 66.09
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 47.83
2 3 53.61
3 4 0.00
4 5 61.37
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -30.98
2 3 -33.00
3 4 0.00
4 5 70.82
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
5
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 -12.44
2 3 -13.43
3 4 0.00
4 5 59.95
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 38.19
2 3 49.34
3 4 0.00
4 5 64.62
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -63.06
2 3 -76.19
3 4 0.00
4 5 55.27
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
6
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 -17.95
2 3 -28.66
3 4 0.00
4 5 58.45
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 39.97
2 3 42.32
3 4 0.00
4 5 62.38
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -75.86
2 3 -99.64
3 4 0.00
4 5 54.53
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
7
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 0.95
2 3 -6.81
3 4 0.00
4 5 65.73
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 46.97
2 3 44.55
3 4 0.00
4 5 58.92
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -45.07
2 3 -58.17
3 4 0.00
4 5 72.54
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
8
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 3.97
2 3 -4.95
3 4 0.00
4 5 66.80
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 48.24
2 3 45.77
3 4 0.00
4 5 57.95
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -40.30
2 3 -55.67
3 4 0.00
4 5 75.65
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
9
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 5.91
2 3 1.37
3 4 0.00
4 5 67.83
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 48.19
2 3 47.85
3 4 0.00
4 5 57.86
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -36.37
2 3 -45.10
3 4 0.00
4 5 77.79
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
10
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 6.57
2 3 2.28
3 4 0.00
4 5 66.25
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 50.64
2 3 50.51
3 4 0.00
4 5 55.94
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -37.50
2 3 -45.95
3 4 0.00
4 5 76.56
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
11
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 3.33
2 3 -2.75
3 4 0.00
4 5 64.86
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 50.49
2 3 50.21
3 4 0.00
4 5 57.01
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -43.84
2 3 -55.71
3 4 0.00
4 5 72.70
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
12
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 4.45
2 3 -4.07
3 4 0.00
4 5 64.89
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 51.01
2 3 50.11
3 4 0.00
4 5 57.04
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -42.11
2 3 -58.24
3 4 0.00
4 5 72.75
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
13
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 4.41
2 3 -4.03
3 4 0.00
4 5 64.89
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 51.23
2 3 50.00
3 4 0.00
4 5 57.01
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -42.41
2 3 -58.07
3 4 0.00
4 5 72.76
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
14
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 4.01
2 3 -3.84
3 4 0.00
4 5 65.06
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 51.86
2 3 50.04
3 4 0.00
4 5 57.20
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -43.84
2 3 -57.71
3 4 0.00
4 5 72.92
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
15
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 4.18
2 3 -3.74
3 4 0.00
4 5 65.08
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 51.04
2 3 50.10
3 4 0.00
4 5 57.24
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -42.68
2 3 -57.58
3 4 0.00
4 5 72.92
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
16
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 4.13
2 3 -4.12
3 4 0.00
4 5 65.13
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 50.99
2 3 49.66
3 4 0.00
4 5 57.31
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -42.72
2 3 -57.91
3 4 0.00
4 5 72.95
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
17
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 3.13
2 3 -5.58
3 4 0.00
4 5 65.18
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 50.89
2 3 49.69
3 4 0.00
4 5 57.38
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -44.63
2 3 -60.85
3 4 0.00
4 5 72.98
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
18
Number Components Variance Explained (%), All CV
0 1 0.00
1 2 3.27
2 3 -5.33
3 4 0.00
4 5 65.12
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), DA CV
0 1 0.00
1 2 50.93
2 3 49.85
3 4 0.00
4 5 57.31
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
Number Components Variance Explained (%), HT CV
0 1 0.00
1 2 -44.39
2 3 -60.51
3 4 0.00
4 5 72.93
5 6 0.00
6 7 0.00
7 8 0.00
8 9 0.00
9 10 0.00
10 11 0.00
11 12 0.00
12 13 0.00
13 14 0.00
14 15 0.00
15 16 0.00
16 17 0.00
17 18 0.00
#R2X
if modelChoice == 'PCR':
pca = PCA(n_components=nComponents);
X_reduced = pca.fit_transform(X);
X_recovered = pca.inverse_transform(X_reduced);
if modelChoice == 'PLSR':
plsr = PLSRegression(n_components=nComponents,scale=False);
X_reduced = plsr.fit_transform(X,y);
X_recovered = plsr.inverse_transform(plsr.x_scores_);
plt.plot(X.T);
plt.xlabel('Sample')
plt.ylabel('Preprocessed Current (nA)')
plt.title('Preprocessed Voltammogram')
plt.show();
plt.plot((X_recovered).T);
plt.xlabel('Sample')
plt.ylabel('Preprocessed Current (nA)')
plt.title('Reconstructed Preprocessed Voltammogram')
plt.show()
if preProcess != 'Normalize':
plt.plot((preProcesser.inverse_transform(X)).T);
plt.xlabel('Sample')
plt.ylabel('Current (nA)')
plt.title('Voltammogram')
plt.show();
plt.plot((preProcesser.inverse_transform(X_recovered)).T);
plt.xlabel('Sample')
plt.ylabel('Current (nA)')
plt.title('Reconstructed Voltammogram')
plt.show()
print("Reconstruction Error (%):", 100*round(1-r2_score(X,X_recovered),4))
Reconstruction Error (%): 15.85
#R2Y
if modelChoice =='PCR':
myModel.fit(X_raw,y)
if modelChoice =='PLSR':
myModel.fit_transform(X_raw,y)
y_pred = myModel.predict(X_raw)
df_PredConc=pd.DataFrame({"Predicted DA": np.round(y_pred[:,0],2),'Actual DA':np.round(y[:,0],2),
"Predicted 5HT": np.round(y_pred[:,1],2),'Actual 5HT':np.round(y[:,1],2)})
print(df_PredConc)
r2_score_DA = r2_score(y_DA,y_pred[:,0],multioutput='raw_values')
r2_score_5HT = r2_score(y_5HT,y_pred[:,1],multioutput='raw_values')
print('R2Y DA:',(100*np.round(r2_score_DA,4)))
print('R2Y 5HT:', (100*np.round(r2_score_5HT,4)))
print('R2Y All:',100*np.round(r2_score(y,y_pred),4))
plt.scatter(df_ConcAll["DA"],y_pred[:,0], label="DA");
plt.scatter(df_ConcAll["HT"],y_pred[:,1], label="HT");
plt.xlabel('Observed Concentration (uM)')
plt.ylabel('Predicted Concentration (uM)')
plt.title("Predicted versus Observed Concentration")
abline(1,0)
plt.plot();
plt.legend();
#debug code
R2Y_DA = 1-(np.sum((y_pred[:,0]-y_DA)**2))/(np.sum((y_DA-np.mean(y_DA))**2))
R2Y_5HT = 1-(np.sum((y_pred[:,1]-y_5HT)**2))/(np.sum((y_5HT-np.mean(y_5HT))**2))
assert R2Y_DA == r2_score_DA
assert R2Y_5HT == r2_score_5HT
Predicted DA Actual DA Predicted 5HT Actual 5HT 0 0.32 0.0 -0.51 0.0 1 3.65 5.0 0.34 0.0 2 0.41 0.0 -0.35 0.0 3 -0.02 0.0 3.28 4.0 4 -0.14 0.0 0.14 0.0 5 3.34 2.0 1.46 2.0 6 -0.01 0.0 -0.06 0.0 7 3.28 4.0 0.11 0.0 8 -0.09 0.0 -0.07 0.0 9 -0.48 0.0 3.86 3.0 10 -0.14 0.0 0.09 0.0 11 2.45 1.5 2.14 1.5 12 0.01 0.0 0.55 0.0 13 4.05 5.0 0.03 0.0 14 -0.05 0.0 0.89 0.0 15 -0.62 0.0 4.33 5.0 16 0.50 0.0 -0.44 0.0 17 3.03 2.0 1.70 2.0 R2Y DA: [85.16] R2Y 5HT: [89.96] R2Y All: 87.56
fig, axes = plt.subplots(3, 2, figsize=(10, 15))
title = "Learning Curves (PLSR)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
estimator = myModel
plot_learning_curve(estimator, title, X_raw, y, axes=axes[:, 0], ylim=(-1, 1.01),
cv=cv, n_jobs=4)
plt.show()
# in vivo
time_array = np.arange(0,492)
time_array = (time_array - 200)*1*400/1000
time_array_500 = (np.arange(0,501)-200)*400/1000
#for P1 TP4
stimulationFiles = ['\\25800_P1','\\27800_P1','\\33850_P1',
'\\9300_P1_2','\\11250_P1_2','\\13180_P1_2','\\16280_P1_2','\\18000_P1_2','\\22080_P1_2','\\23800_P1_2','\\26400_P1_2','\\31900_P1_2',
'\\20100_postinj','\\22050_postinj','\\23950_postinj','\\28200_postinj','\\31550_postinj','\\33350_postinj','\\34900_postinj']
#stimulationFiles= ['\\14950_postESC', '\\17030_postESC']
nameFiles = []
for i in stimulationFiles:
nameFiles.append('Obj'+i[1:])
print(nameFiles)
objs = list()
current_dfs=[]
for x in [2]:
print(x)
if modelChoice == 'PLSR':
myModel = make_pipeline(preProcesser, PLSRegression(n_components=x,scale=False))
myModel.fit_transform(X_raw,y)
if modelChoice == 'PCR':
myModel = make_pipeline(preProcesser, PCA(n_components=x), LinearRegression())
myModel.fit(X_raw,y)
j=0
for i in stimulationFiles:
#iv_file_location=r'C:\Users\csmov\fscv_fpv_HYdata\in vivo Data FSCV_FPV_2020\in vivo Data FSCV_FPV_2020\20190112'
iv_file_location=r'C:\Users\csmov\fscv_fpv_HYdata\in vivo Data FSCV_FPV_2020\in vivo Data FSCV_FPV_2020\20190113'
iv_file_location=iv_file_location+i+'.xlsx'
sheetNameIV = 'NBS NA PulseCut'
df_RPV_iv = pd.read_excel(iv_file_location, sheet_name=sheetNameIV,header=None) #dataframe for voltammogram response
df_RPV_iv_T = df_RPV_iv.T #transpose to get in correct format for sklearn
X_iv = df_RPV_iv_T.to_numpy()
results = myModel.predict(X_iv)
#print(pd.DataFrame({"Sample": df_RPV_iv_T.index,"Predicted DA (uM)": np.round(results[:,0],4),"Predicted 5HT (uM)": np.round(results[:,1],4)}))
df_results=pd.DataFrame({"Predicted DA (uM)": np.round(results[:,0],4),"Predicted 5HT (uM)": np.round(results[:,1],4)})
#globals()['df_results_'+str(i[1:])+'_'+str(x)+'comp'] = df_results
df_results.name = str(i[1:])+'_'+str(x)+'comp'
current_dfs.append(df_results)
predDA = df_results['Predicted DA (uM)'].to_numpy()
predHT = df_results['Predicted 5HT (uM)'].to_numpy()
stimType='Pulse'
numComps=x
print(i)
mySampleName = i
behavior=0
objs.append(Sample(myName=i+'_' + preProcess + ' ' + modelChoice + ' ' + str(x) + ' Components ',
myData=df_results,myTime=i,myModel=modelChoice,myNumComp=x,myPreProcess=preProcess,
mySampleName=nameFiles[j]))
if mySampleName in ['\\33850_P1','\\11250_P1_2']:
objs[-1].set_StimFreq(40)
objs[-1].set_StimLength(20)
if mySampleName in ['\\25800_P1','\\27800_P1','\\9300_P1_2','\\13180_P1_2','\\16280_P1_2', '\\18000_P1_2','\\22080_P1_2','\\23800_P1_2','\\26400_P1_2','\\31900_P1_2','\\20100_postinj','\\22050_postinj','\\23950_postinj','\\28200_postinj','\\31550_postinj','\\33350_postinj','\\34900_postinj']:
objs[-1].set_StimFreq(30)
objs[-1].set_StimLength(20)
if mySampleName in ['\\25800_P1','\\27800_P1','\\33850_P1']:
objs[-1].set_Position(11803)
if mySampleName in ['\\9300_P1_2','\\11250_P1_2', '\\13180_P1_2','\\16280_P1_2', '\\18000_P1_2']:
objs[-1].set_Position(11950)
if mySampleName in ['\\22080_P1_2','\\23800_P1_2', '\\26400_P1_2','\\31900_P1_2','\\20100_postinj','\\22050_postinj','\\23950_postinj','\\28200_postinj']:
objs[-1].set_Position(12150)
if mySampleName in ['\\33350_postinj','\\31550_postinj','\\34900_postinj']:
objs[-1].set_Position(12350)
if mySampleName in ['\\20100_postinj','\\22050_postinj','\\23950_postinj','\\28200_postinj','\\31550_postinj']:
objs[-1].set_Drug('post-SSRI')
if mySampleName not in ['\\20100_postinj', '\\22050_postinj', '\\23950_postinj', '\\28200_postinj']:
objs[-1].set_Drug('pre-SSRI')
fig, ax1 = plt.subplots()
ax1.plot(time_array,1000*moving_average(predDA,10),label='Dopamine',color='b')
#above was time_array+7
ax1.set_xlabel('Time Since Stimulation Start (s)',fontsize=16)
ax1.tick_params(axis='x',labelsize=14)
ax1.set_ylabel('Dopamine Concentration (nM)',fontSize=16)
ax1.tick_params(axis='y',labelsize=14)
ax2 = ax1.twinx()
ax2.plot(time_array,1000*moving_average(predHT,10),label='Serotinin',color='m')
#above was time_array+7
ax2.axvspan(0, 20, color='gold', alpha=0.3)
#ax2.legend(fontsize=12)
ax2.set_xlabel('Time Since Stimulation Start (s)',fontsize=16)
ax2.tick_params(axis='x',labelsize=14)
ax2.set_ylabel('Serotonin Concentration (nM)',fontSize=16)
ax2.tick_params(axis='y',labelsize=14)
fig.legend(loc='upper right', bbox_to_anchor=(0.84, 0.85),fontsize=14)
plt.title(str(i) + ' ' + preProcess + ' ' + modelChoice + ' ' + str(x) + ' Components ');
fig = plt.gcf()
fig.set_size_inches(9, 5)
#plt.savefig(i[2:]+'1_3040_combined.jpeg',dpi=600,quality=95)
plt.show()
plt.plot(time_array,moving_average(predDA,10)-np.round(np.mean((df_results.iloc[:101,0]).to_numpy()),3),label='Dopamine',color='b');
#above was time_array+3
plt.plot(time_array,moving_average(predHT,10)-np.round(np.mean((df_results.iloc[:101,1]).to_numpy()),3),label='Serotonin',color='m');
#above was time_array+3
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.legend(fontsize=12)
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
plt.title(str(i) + ' ' + preProcess + ' ' + modelChoice + ' ' + str(x) + ' Components ');
plt.show();
j+=1
['Obj25800_P1', 'Obj27800_P1', 'Obj33850_P1', 'Obj9300_P1_2', 'Obj11250_P1_2', 'Obj13180_P1_2', 'Obj16280_P1_2', 'Obj18000_P1_2', 'Obj22080_P1_2', 'Obj23800_P1_2', 'Obj26400_P1_2', 'Obj31900_P1_2', 'Obj20100_postinj', 'Obj22050_postinj', 'Obj23950_postinj', 'Obj28200_postinj', 'Obj31550_postinj', 'Obj33350_postinj', 'Obj34900_postinj'] 2 \25800_P1
\27800_P1
\33850_P1
\9300_P1_2
\11250_P1_2
\13180_P1_2
\16280_P1_2
\18000_P1_2
\22080_P1_2
\23800_P1_2
\26400_P1_2
\31900_P1_2
\20100_postinj
\22050_postinj
\23950_postinj
\28200_postinj
\31550_postinj
\33350_postinj
\34900_postinj
for i in objs:
print(i.__dict__)
{'_Name': '\\25800_P1_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 1.8088 1.0523
1 1.8121 1.0535
2 1.8136 1.0533
3 1.8131 1.0536
4 1.8116 1.0535
.. ... ...
496 1.8227 1.0528
497 1.8224 1.0530
498 1.8244 1.0534
499 1.8270 1.0537
500 1.8114 1.0517
[501 rows x 2 columns], '_Time': '\\25800_P1', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11803, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj25800_P1', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\27800_P1_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 1.8078 1.0543
1 1.8143 1.0553
2 1.8150 1.0558
3 1.8177 1.0555
4 1.8166 1.0555
.. ... ...
496 1.8264 1.0592
497 1.8252 1.0592
498 1.8232 1.0589
499 1.8200 1.0594
500 1.8105 1.0573
[501 rows x 2 columns], '_Time': '\\27800_P1', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11803, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj27800_P1', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\33850_P1_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 1.8715 1.0801
1 1.8884 1.0815
2 1.8855 1.0823
3 1.8833 1.0822
4 1.8868 1.0817
.. ... ...
496 1.9198 1.0820
497 1.9215 1.0817
498 1.9183 1.0818
499 1.9129 1.0815
500 1.9006 1.0808
[501 rows x 2 columns], '_Time': '\\33850_P1', '_StimType': 0, '_StimLength': 20, '_StimFreq': 40, '_Position': 11803, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj33850_P1', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\9300_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0328 1.0517
1 2.0436 1.0524
2 2.0416 1.0523
3 2.0389 1.0519
4 2.0377 1.0520
.. ... ...
496 2.0539 1.0526
497 2.0552 1.0527
498 2.0562 1.0527
499 2.0567 1.0528
500 2.0374 1.0516
[501 rows x 2 columns], '_Time': '\\9300_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11950, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj9300_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\11250_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0394 1.0522
1 2.0520 1.0526
2 2.0516 1.0526
3 2.0528 1.0529
4 2.0587 1.0536
.. ... ...
496 2.0694 1.0528
497 2.0641 1.0524
498 2.0664 1.0524
499 2.0648 1.0527
500 2.0482 1.0516
[501 rows x 2 columns], '_Time': '\\11250_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 40, '_Position': 11950, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj11250_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\13180_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0544 1.0554
1 2.0682 1.0564
2 2.0668 1.0562
3 2.0719 1.0566
4 2.0694 1.0563
.. ... ...
496 2.0857 1.0558
497 2.0832 1.0555
498 2.0849 1.0559
499 2.0845 1.0557
500 2.0728 1.0548
[501 rows x 2 columns], '_Time': '\\13180_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11950, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj13180_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\16280_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0323 1.0498
1 2.0615 1.0529
2 2.0641 1.0529
3 2.0682 1.0533
4 2.0591 1.0522
.. ... ...
496 2.0716 1.0526
497 2.0711 1.0528
498 2.0677 1.0524
499 2.0694 1.0525
500 2.0551 1.0512
[501 rows x 2 columns], '_Time': '\\16280_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11950, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj16280_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\18000_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0610 1.0535
1 2.0704 1.0551
2 2.0695 1.0551
3 2.0707 1.0552
4 2.0667 1.0550
.. ... ...
496 2.0703 1.0542
497 2.0798 1.0548
498 2.0786 1.0549
499 2.0708 1.0545
500 2.0511 1.0524
[501 rows x 2 columns], '_Time': '\\18000_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 11950, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj18000_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\22080_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0149 1.0771
1 2.0290 1.0783
2 2.0224 1.0782
3 2.0312 1.0786
4 2.0331 1.0787
.. ... ...
496 2.0460 1.0769
497 2.0462 1.0771
498 2.0514 1.0777
499 2.0501 1.0773
500 2.0264 1.0751
[501 rows x 2 columns], '_Time': '\\22080_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj22080_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\23800_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0292 1.0762
1 2.0424 1.0777
2 2.0430 1.0776
3 2.0440 1.0776
4 2.0461 1.0778
.. ... ...
496 2.0710 1.0770
497 2.0724 1.0773
498 2.0728 1.0772
499 2.0736 1.0771
500 2.0570 1.0761
[501 rows x 2 columns], '_Time': '\\23800_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj23800_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\26400_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0731 1.0780
1 2.0897 1.0797
2 2.0859 1.0792
3 2.0844 1.0793
4 2.0871 1.0799
.. ... ...
496 2.1021 1.0802
497 2.1009 1.0799
498 2.1072 1.0806
499 2.1016 1.0803
500 2.0906 1.0795
[501 rows x 2 columns], '_Time': '\\26400_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj26400_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\31900_P1_2_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.0958 1.0796
1 2.1134 1.0804
2 2.1119 1.0804
3 2.1148 1.0805
4 2.1070 1.0797
.. ... ...
496 2.1342 1.0803
497 2.1369 1.0806
498 2.1379 1.0805
499 2.1367 1.0800
500 2.1199 1.0791
[501 rows x 2 columns], '_Time': '\\31900_P1_2', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj31900_P1_2', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\20100_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1555 1.0754
1 2.1666 1.0758
2 2.1669 1.0760
3 2.1678 1.0761
4 2.1708 1.0764
.. ... ...
496 2.1835 1.0768
497 2.1844 1.0769
498 2.1813 1.0765
499 2.1846 1.0767
500 2.1674 1.0755
[501 rows x 2 columns], '_Time': '\\20100_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj20100_postinj', '_Drug': 'post-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\22050_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1669 1.0761
1 2.1803 1.0771
2 2.1849 1.0773
3 2.1858 1.0774
4 2.1837 1.0780
.. ... ...
496 2.1925 1.0767
497 2.1995 1.0774
498 2.1974 1.0773
499 2.1987 1.0773
500 2.1852 1.0763
[501 rows x 2 columns], '_Time': '\\22050_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj22050_postinj', '_Drug': 'post-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\23950_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1882 1.0777
1 2.2051 1.0789
2 2.2030 1.0787
3 2.2059 1.0788
4 2.1938 1.0775
.. ... ...
496 2.2044 1.0781
497 2.2076 1.0786
498 2.2045 1.0786
499 2.2054 1.0789
500 2.1767 1.0766
[501 rows x 2 columns], '_Time': '\\23950_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj23950_postinj', '_Drug': 'post-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\28200_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1751 1.0788
1 2.1961 1.0801
2 2.1905 1.0796
3 2.1940 1.0802
4 2.1859 1.0793
.. ... ...
496 2.1425 1.0555
497 2.1411 1.0551
498 2.1409 1.0553
499 2.1397 1.0549
500 2.1306 1.0533
[501 rows x 2 columns], '_Time': '\\28200_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12150, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj28200_postinj', '_Drug': 'post-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\31550_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1666 1.0536
1 2.1769 1.0550
2 2.1763 1.0550
3 2.1762 1.0548
4 2.1698 1.0543
.. ... ...
496 2.1818 1.0573
497 2.1840 1.0573
498 2.1824 1.0572
499 2.1811 1.0572
500 2.1710 1.0558
[501 rows x 2 columns], '_Time': '\\31550_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12350, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj31550_postinj', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\33350_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.1959 1.0609
1 2.1973 1.0610
2 2.1948 1.0609
3 2.1957 1.0610
4 2.1968 1.0609
.. ... ...
496 2.2013 1.0609
497 2.2046 1.0607
498 2.2033 1.0608
499 2.1957 1.0605
500 2.2066 1.0601
[501 rows x 2 columns], '_Time': '\\33350_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12350, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj33350_postinj', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
{'_Name': '\\34900_postinj_Normalize PCR 2 Components ', '_Data': Predicted DA (uM) Predicted 5HT (uM)
0 2.2250 1.0627
1 2.2150 1.0627
2 2.2152 1.0626
3 2.2334 1.0636
4 2.2352 1.0633
.. ... ...
496 2.2284 1.0646
497 2.2286 1.0646
498 2.2259 1.0642
499 2.2079 1.0625
500 2.1969 1.0612
[501 rows x 2 columns], '_Time': '\\34900_postinj', '_StimType': 0, '_StimLength': 20, '_StimFreq': 30, '_Position': 12350, '_Behavior': 0, '_Model': 'PCR', '_NumComp': 2, '_PreProcess': 'Normalize', '_HyperParams': 0, '_SampleName': 'Obj34900_postinj', '_Drug': 'pre-SSRI', '_AlignedData': 0, '_BrainRegion': 0, '_5HT_Basal': 0, '_5HT_Phasic': 0, '_DA_Basal': 0, '_DA_Phasic': 0}
for i in objs:
i._DA_Basal = i._Data['Predicted DA (uM)'].to_numpy()
i._DA_Phasic = np.round(np.mean((i._Data['Predicted DA (uM)'].iloc[:101]).to_numpy()),3)
i._5HT_Basal = i._Data['Predicted 5HT (uM)'].to_numpy()
i._5HT_Phasic = np.round(np.mean((i._Data['Predicted 5HT (uM)'].iloc[:101]).to_numpy()),3)
for i in objs:
plt.plot(i._DA_Basal,label=str(i._NumComp)+' Components '+str(i._SampleName))
plt.legend()
plt.show()
for i in objs:
plt.plot(i._5HT_Basal,label=str(i._NumComp)+' Components '+str(i._SampleName))
plt.legend()
plt.show()
for j in stimulationFiles:
for i in objs:
if str(i._SampleName) == 'Obj'+j[1:]:
plt.plot(i._5HT_Basal,label=str(i._NumComp)+' Components')
plt.legend()
plt.title(j+'Predicted Serotonin')
plt.show()
for j in stimulationFiles:
for i in objs:
if str(i._SampleName) == 'Obj'+j[1:]:
plt.plot(i._DA_Basal,label=str(i._NumComp)+' Components')
plt.legend()
plt.title(j+'Predicted Dopamine')
plt.show()
for i in objs:
if i._Position >= 12200:
i._BrainRegion = 'Dorsal'
else:
i._BrainRegion = 'Ventral'
if i._Time == '\\23950_postinj':
i._StimType = 'None'
color_dict = {'11803':'r','11950':'g','12150':'b','12350':'m'}
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000+11
MA_Data = moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)
i._AlignedData = pd.DataFrame(MA_Data,time_array)
plt.plot(time_array+11,MA_Data,label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Dopamine, 2 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11803:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='r',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2803, N=3',color='r')
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11950:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='g',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2950, N=5',color='g')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._StimType != 'None':
if i._Position == 12150:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='b',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 3150, N=7',color='b')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 12350:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='m',alpha=0.1)
plt.plot(Average_byPosition,label='Dorsal 3350, N=3',color='m')
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
plt.title('Predicted Average Striatal Dopamine, 2 Component '+modelChoice,fontsize=18)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array_offset = time_array_offset[0]
time_array = (time_array - time_array_offset+20)*1*400/1000+11
plt.plot(time_array,moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10),label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin, 2 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array_offset = time_array_offset[0]
time_array = (time_array - time_array_offset+20)*1*400/1000+11
MA_Data = moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)
i._AlignedData = pd.DataFrame(MA_Data,time_array)
plt.plot(time_array+11,MA_Data,label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin, 2 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11803:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='r',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2803, N=3',color='r')
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11950:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='g',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2950, N=5',color='g')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._StimType != 'None':
if i._Position == 12150:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='b',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 3150, N=7',color='b')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 12350:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
print(Average_byPosition.to_string())
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='m',alpha=0.1)
plt.plot(Average_byPosition,label='Dorsal 3350, N=3',color='m')
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
plt.title('Predicted Average Striatal Serotonin, 2 Component '+modelChoice,fontsize=18)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==3:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000
MA_Data = moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)
i._AlignedData = pd.DataFrame(MA_Data,time_array)
plt.plot(time_array+11,MA_Data,label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin, 3 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listDF_byPosition=[]
for i in objs:
if i._NumComp==3:
if i._Position == 11803:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='r',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2803, N=3',color='r')
listDF_byPosition=[]
for i in objs:
if i._NumComp==3:
if i._Position == 11950:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='g',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 2950, N=5',color='g')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==3:
if i._StimType != 'None':
if i._Position == 12150:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='b',alpha=0.1)
plt.plot(Average_byPosition,label='Ventral 3150, N=7',color='b')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==3:
if i._Position == 12350:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Std_byPosition = Average_byPosition.std(axis=1, level=0)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.fill_between(list(Average_byPosition.index.values),Average_byPosition[0]-Std_byPosition[0],Average_byPosition[0]+Std_byPosition[0],color='m',alpha=0.1)
plt.plot(Average_byPosition,label='Dorsal 3350, N=3',color='m')
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
plt.title('Predicted Average Striatal Serotonin, 3 Component '+modelChoice,fontsize=18)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==3:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000
plt.plot(time_array,moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10),label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin, 3 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
color_dict = {'pre-SSRI':'r','post-SSRI':'g'}
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 200
if int(time_array_offset[0]) <= 100:
time_array_offset = 200
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000
plt.plot(time_array,moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10),label=i._Drug + str(i._StimFreq),color=color_dict[str(i._Drug)])
plt.title('Predicted Striatal Serotonin, 2 Component '+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==3:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
if int(time_array_offset[0]) <= 100:
time_array_offset = 200
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000
plt.plot(time_array,moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10),label=i._Drug + str(i._StimFreq),color=color_dict[str(i._Drug)])
plt.title('Predicted Striatal Serotonin, 3 Component'+modelChoice,fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
0 0 0 -82.2 NaN NaN 1.06267 -81.8 NaN NaN 1.06255 -81.4 NaN NaN 1.06254 -81.0 NaN NaN 1.06251 -80.6 NaN NaN 1.06237 -80.2 NaN NaN 1.06225 -79.8 NaN NaN 1.06217 -79.4 NaN NaN 1.06221 -79.0 NaN NaN 1.06220 -78.6 NaN NaN 1.06221 -78.2 NaN NaN 1.06221 -77.8 NaN NaN 1.06219 -77.4 NaN NaN 1.06218 -77.0 NaN NaN 1.06228 -76.6 NaN NaN 1.06217 -76.2 NaN NaN 1.06220 -75.8 NaN 1.06079 1.06235 -75.4 NaN 1.06061 1.06235 -75.0 NaN 1.06056 1.06235 -74.6 NaN 1.06059 1.06235 -74.2 NaN 1.06057 1.06243 -73.8 NaN 1.06054 1.06248 -73.4 NaN 1.06060 1.06249 -73.0 NaN 1.06055 1.06242 -72.6 NaN 1.06048 1.06261 -72.2 NaN 1.06041 1.06270 -71.8 NaN 1.06035 1.06265 -71.4 NaN 1.06031 1.06259 -71.0 NaN 1.06031 1.06257 -70.6 1.05451 1.06024 1.06253 -70.2 1.05455 1.06022 1.06256 -69.8 1.05455 1.06023 1.06256 -69.4 1.05453 1.06008 1.06256 -69.0 1.05453 1.06008 1.06259 -68.6 1.05448 1.06015 1.06265 -68.2 1.05440 1.06019 1.06258 -67.8 1.05435 1.06025 1.06252 -67.4 1.05432 1.06047 1.06245 -67.0 1.05433 1.06057 1.06249 -66.6 1.05430 1.06071 1.06257 -66.2 1.05422 1.06085 1.06242 -65.8 1.05420 1.06099 1.06233 -65.4 1.05423 1.06113 1.06230 -65.0 1.05424 1.06123 1.06239 -64.6 1.05437 1.06130 1.06234 -64.2 1.05440 1.06144 1.06247 -63.8 1.05443 1.06148 1.06256 -63.4 1.05444 1.06140 1.06274 -63.0 1.05448 1.06141 1.06273 -62.6 1.05452 1.06140 1.06268 -62.2 1.05457 1.06141 1.06280 -61.8 1.05460 1.06144 1.06290 -61.4 1.05460 1.06138 1.06296 -61.0 1.05460 1.06143 1.06284 -60.6 1.05465 1.06150 1.06293 -60.2 1.05466 1.06151 1.06282 -59.8 1.05468 1.06154 1.06297 -59.4 1.05468 1.06153 1.06282 -59.0 1.05470 1.06155 1.06278 -58.6 1.05478 1.06155 1.06278 -58.2 1.05475 1.06151 1.06275 -57.8 1.05478 1.06145 1.06278 -57.4 1.05485 1.06152 1.06277 -57.0 1.05496 1.06149 1.06273 -56.6 1.05495 1.06143 1.06275 -56.2 1.05501 1.06134 1.06274 -55.8 1.05514 1.06129 1.06249 -55.4 1.05525 1.06129 1.06248 -55.0 1.05534 1.06128 1.06265 -54.6 1.05544 1.06129 1.06264 -54.2 1.05561 1.06128 1.06278 -53.8 1.05572 1.06123 1.06273 -53.4 1.05581 1.06118 1.06274 -53.0 1.05586 1.06116 1.06295 -52.6 1.05591 1.06115 1.06282 -52.2 1.05596 1.06116 1.06289 -51.8 1.05599 1.06115 1.06301 -51.4 1.05602 1.06110 1.06313 -51.0 1.05604 1.06102 1.06305 -50.6 1.05598 1.06091 1.06317 -50.2 1.05592 1.06084 1.06309 -49.8 1.05586 1.06069 1.06310 -49.4 1.05585 1.06062 1.06314 -49.0 1.05596 1.06051 1.06301 -48.6 1.05603 1.06042 1.06303 -48.2 1.05608 1.06038 1.06299 -47.8 1.05615 1.06038 1.06294 -47.4 1.05623 1.06036 1.06286 -47.0 1.05631 1.06034 1.06281 -46.6 1.05641 1.06018 1.06272 -46.2 1.05643 1.06017 1.06272 -45.8 1.05652 1.06025 1.06263 -45.4 1.05657 1.06028 1.06256 -45.0 1.05653 1.06030 1.06256 -44.6 1.05653 1.06029 1.06250 -44.2 1.05661 1.06028 1.06244 -43.8 1.05658 1.06024 1.06241 -43.4 1.05654 1.06033 1.06241 -43.0 1.05648 1.06031 1.06240 -42.6 1.05646 1.06041 1.06237 -42.2 1.05652 1.06036 1.06234 -41.8 1.05647 1.06036 1.06241 -41.4 1.05637 1.06036 1.06240 -41.0 1.05626 1.06035 1.06235 -40.6 1.05620 1.06035 1.06237 -40.2 1.05613 1.06032 1.06240 -39.8 1.05617 1.06027 1.06247 -39.4 1.05617 1.06013 1.06242 -39.0 1.05618 1.06012 1.06234 -38.6 1.05621 1.06011 1.06231 -38.2 1.05626 1.06012 1.06228 -37.8 1.05632 1.06012 1.06224 -37.4 1.05640 1.06004 1.06218 -37.0 1.05647 1.06003 1.06213 -36.6 1.05656 1.06001 1.06208 -36.2 1.05661 1.06006 1.06202 -35.8 1.05660 1.06010 1.06193 -35.4 1.05659 1.06008 1.06193 -35.0 1.05660 1.06006 1.06198 -34.6 1.05656 1.06009 1.06198 -34.2 1.05648 1.06010 1.06194 -33.8 1.05645 1.06010 1.06187 -33.4 1.05642 1.06017 1.06190 -33.0 1.05636 1.06018 1.06193 -32.6 1.05630 1.06021 1.06186 -32.2 1.05626 1.06016 1.06184 -31.8 1.05619 1.06017 1.06188 -31.4 1.05615 1.06022 1.06192 -31.0 1.05611 1.06020 1.06219 -30.6 1.05610 1.06023 1.06246 -30.2 1.05607 1.06017 1.06248 -29.8 1.05604 1.06015 1.06251 -29.4 1.05602 1.06008 1.06254 -29.0 1.05603 1.06004 1.06256 -28.6 1.05602 1.05997 1.06267 -28.2 1.05597 1.05997 1.06266 -27.8 1.05597 1.05992 1.06262 -27.4 1.05592 1.05988 1.06257 -27.0 1.05590 1.05985 1.06226 -26.6 1.05588 1.05963 1.06200 -26.2 1.05581 1.05945 1.06202 -25.8 1.05567 1.05924 1.06205 -25.4 1.05568 1.05922 1.06206 -25.0 1.05563 1.05903 1.06204 -24.6 1.05556 1.05885 1.06202 -24.2 1.05556 1.05864 1.06206 -23.8 1.05559 1.05860 1.06205 -23.4 1.05565 1.05861 1.06207 -23.0 1.05566 1.05862 1.06206 -22.6 1.05566 1.05877 1.06202 -22.2 1.05576 1.05901 1.06231 -21.8 1.05590 1.05921 1.06224 -21.4 1.05589 1.05914 1.06219 -21.0 1.05592 1.05933 1.06218 -20.6 1.05596 1.05956 1.06217 -20.2 1.05597 1.05973 1.06219 -19.8 1.05600 1.05974 1.06214 -19.4 1.05602 1.05973 1.06216 -19.0 1.05605 1.05977 1.06220 -18.6 1.05606 1.05979 1.06217 -18.2 1.05604 1.05977 1.06181 -17.8 1.05598 1.05979 1.06187 -17.4 1.05597 1.05994 1.06194 -17.0 1.05602 1.05996 1.06196 -16.6 1.05609 1.05998 1.06203 -16.2 1.05610 1.06006 1.06208 -15.8 1.05603 1.06013 1.06217 -15.4 1.05604 1.06018 1.06222 -15.0 1.05603 1.06023 1.06229 -14.6 1.05599 1.06023 1.06237 -14.2 1.05584 1.06026 1.06239 -13.8 1.05570 1.06026 1.06234 -13.4 1.05549 1.06035 1.06235 -13.0 1.05541 1.06036 1.06237 -12.6 1.05535 1.06037 1.06230 -12.2 1.05527 1.06036 1.06224 -11.8 1.05518 1.06036 1.06216 -11.4 1.05509 1.06028 1.06214 -11.0 1.05505 1.06026 1.06213 -10.6 1.05508 1.06029 1.06222 -10.2 1.05518 1.06030 1.06228 -9.8 1.05534 1.06027 1.06226 -9.4 1.05551 1.06017 1.06222 -9.0 1.05557 1.06015 1.06222 -8.6 1.05557 1.06006 1.06228 -8.2 1.05562 1.06004 1.06227 -7.8 1.05571 1.05998 1.06227 -7.4 1.05575 1.06016 1.06226 -7.0 1.05577 1.06010 1.06229 -6.6 1.05576 1.06007 1.06242 -6.2 1.05580 1.06005 1.06251 -5.8 1.05585 1.06007 1.06277 -5.4 1.05586 1.06007 1.06281 -5.0 1.05581 1.06014 1.06293 -4.6 1.05580 1.06024 1.06304 -4.2 1.05574 1.06034 1.06315 -3.8 1.05576 1.06045 1.06342 -3.4 1.05577 1.06033 1.06366 -3.0 1.05578 1.06046 1.06378 -2.6 1.05581 1.06058 1.06366 -2.2 1.05586 1.06069 1.06378 -1.8 1.05555 1.06079 1.06391 -1.4 1.05558 1.06125 1.06420 -1.0 1.05597 1.06171 1.06452 -0.6 1.05630 1.06221 1.06484 -0.2 1.05671 1.06264 1.06518 0.2 1.05703 1.06306 1.06554 0.6 1.05737 1.06352 1.06574 1.0 1.05770 1.06388 1.06605 1.4 1.05803 1.06419 1.06629 1.8 1.05837 1.06450 1.06663 2.2 1.05902 1.06483 1.06704 2.6 1.05939 1.06482 1.06728 3.0 1.05942 1.06475 1.06737 3.4 1.05952 1.06466 1.06727 3.8 1.05977 1.06462 1.06730 4.2 1.05992 1.06461 1.06730 4.6 1.06014 1.06470 1.06731 5.0 1.06039 1.06478 1.06738 5.4 1.06067 1.06496 1.06760 5.8 1.06098 1.06515 1.06752 6.2 1.06135 1.06533 1.06720 6.6 1.06176 1.06555 1.06711 7.0 1.06223 1.06580 1.06708 7.4 1.06272 1.06609 1.06720 7.8 1.06301 1.06640 1.06730 8.2 1.06356 1.06675 1.06731 8.6 1.06404 1.06709 1.06751 9.0 1.06454 1.06746 1.06755 9.4 1.06504 1.06784 1.06767 9.8 1.06551 1.06826 1.06788 10.2 1.06600 1.06869 1.06827 10.6 1.06648 1.06914 1.06852 11.0 1.06693 1.06952 1.06875 11.4 1.06740 1.06994 1.06905 11.8 1.06795 1.07037 1.06937 12.2 1.06836 1.07079 1.06973 12.6 1.06876 1.07122 1.06998 13.0 1.06922 1.07166 1.07027 13.4 1.06965 1.07207 1.07060 13.8 1.07012 1.07242 1.07094 14.2 1.07056 1.07275 1.07127 14.6 1.07096 1.07309 1.07162 15.0 1.07138 1.07349 1.07206 15.4 1.07179 1.07382 1.07242 15.8 1.07224 1.07411 1.07287 16.2 1.07259 1.07441 1.07311 16.6 1.07298 1.07471 1.07339 17.0 1.07331 1.07500 1.07375 17.4 1.07364 1.07526 1.07400 17.8 1.07402 1.07552 1.07426 18.2 1.07433 1.07582 1.07456 18.6 1.07460 1.07604 1.07479 19.0 1.07471 1.07615 1.07484 19.4 1.07469 1.07612 1.07478 19.8 1.07452 1.07592 1.07440 20.2 1.07419 1.07554 1.07404 20.6 1.07370 1.07506 1.07361 21.0 1.07304 1.07431 1.07289 21.4 1.07220 1.07340 1.07203 21.8 1.07058 1.07235 1.07107 22.2 1.06933 1.07117 1.07000 22.6 1.06799 1.06995 1.06897 23.0 1.06668 1.06873 1.06801 23.4 1.06534 1.06758 1.06718 23.8 1.06399 1.06658 1.06644 24.2 1.06287 1.06567 1.06591 24.6 1.06185 1.06480 1.06533 25.0 1.06090 1.06416 1.06493 25.4 1.06008 1.06366 1.06462 25.8 1.05993 1.06329 1.06442 26.2 1.05942 1.06305 1.06427 26.6 1.05905 1.06280 1.06421 27.0 1.05874 1.06267 1.06415 27.4 1.05845 1.06256 1.06405 27.8 1.05836 1.06246 1.06402 28.2 1.05820 1.06242 1.06393 28.6 1.05807 1.06236 1.06391 29.0 1.05791 1.06230 1.06394 29.4 1.05773 1.06223 1.06402 29.8 1.05767 1.06216 1.06402 30.2 1.05763 1.06205 1.06399 30.6 1.05757 1.06189 1.06389 31.0 1.05752 1.06182 1.06379 31.4 1.05751 1.06173 1.06370 31.8 1.05737 1.06166 1.06359 32.2 1.05734 1.06154 1.06349 32.6 1.05732 1.06142 1.06344 33.0 1.05735 1.06133 1.06338 33.4 1.05738 1.06124 1.06329 33.8 1.05734 1.06114 1.06319 34.2 1.05727 1.06115 1.06311 34.6 1.05722 1.06124 1.06306 35.0 1.05717 1.06121 1.06299 35.4 1.05711 1.06125 1.06294 35.8 1.05716 1.06128 1.06292 36.2 1.05715 1.06134 1.06289 36.6 1.05715 1.06133 1.06275 37.0 1.05713 1.06137 1.06261 37.4 1.05718 1.06147 1.06255 37.8 1.05710 1.06159 1.06256 38.2 1.05718 1.06164 1.06257 38.6 1.05714 1.06165 1.06263 39.0 1.05713 1.06163 1.06271 39.4 1.05719 1.06161 1.06283 39.8 1.05704 1.06156 1.06294 40.2 1.05699 1.06157 1.06307 40.6 1.05690 1.06158 1.06331 41.0 1.05687 1.06159 1.06355 41.4 1.05673 1.06152 1.06373 41.8 1.05668 1.06151 1.06396 42.2 1.05655 1.06144 1.06420 42.6 1.05646 1.06134 1.06425 43.0 1.05644 1.06134 1.06434 43.4 1.05644 1.06127 1.06436 43.8 1.05659 1.06126 1.06438 44.2 1.05662 1.06120 1.06434 44.6 1.05667 1.06116 1.06426 45.0 1.05672 1.06109 1.06418 45.4 1.05680 1.06106 1.06413 45.8 1.05690 1.06088 1.06402 46.2 1.05704 1.06060 1.06376 46.6 1.05717 1.06053 1.06374 47.0 1.05722 1.06043 1.06367 47.4 1.05723 1.06045 1.06364 47.8 1.05716 1.06043 1.06364 48.2 1.05717 1.06046 1.06363 48.6 1.05720 1.06049 1.06365 49.0 1.05725 1.06052 1.06363 49.4 1.05727 1.06052 1.06363 49.8 1.05733 1.06064 1.06359 50.2 1.05730 1.06090 1.06364 50.6 1.05734 1.06102 1.06361 51.0 1.05730 1.06111 1.06359 51.4 1.05731 1.06117 1.06356 51.8 1.05735 1.06112 1.06351 52.2 1.05758 1.06101 1.06349 52.6 1.05761 1.06099 1.06345 53.0 1.05756 1.06093 1.06346 53.4 1.05764 1.06087 1.06345 53.8 1.05762 1.06076 1.06345 54.2 1.05767 1.06074 1.06346 54.6 1.05762 1.06071 1.06345 55.0 1.05764 1.06070 1.06342 55.4 1.05761 1.06065 1.06342 55.8 1.05761 1.06069 1.06335 56.2 1.05736 1.06073 1.06332 56.6 1.05728 1.06083 1.06330 57.0 1.05725 1.06085 1.06329 57.4 1.05716 1.06097 1.06323 57.8 1.05707 1.06107 1.06315 58.2 1.05696 1.06110 1.06312 58.6 1.05692 1.06112 1.06301 59.0 1.05684 1.06110 1.06288 59.4 1.05683 1.06112 1.06281 59.8 1.05675 1.06115 1.06284 60.2 1.05669 1.06118 1.06275 60.6 1.05666 1.06103 1.06277 61.0 1.05667 1.06103 1.06281 61.4 1.05669 1.06096 1.06287 61.8 1.05670 1.06087 1.06300 62.2 1.05669 1.06071 1.06306 62.6 1.05671 1.06068 1.06320 63.0 1.05674 1.06068 1.06341 63.4 1.05673 1.06065 1.06350 63.8 1.05678 1.06063 1.06354 64.2 1.05680 1.06061 1.06365 64.6 1.05679 1.06066 1.06364 65.0 1.05676 1.06072 1.06360 65.4 1.05673 1.06071 1.06357 65.8 1.05675 1.06077 1.06357 66.2 1.05671 1.06088 1.06353 66.6 1.05663 1.06092 1.06351 67.0 1.05660 1.06095 1.06347 67.4 1.05660 1.06101 1.06348 67.8 1.05655 1.06100 1.06344 68.2 1.05653 1.06101 1.06339 68.6 1.05651 1.06097 1.06348 69.0 1.05645 1.06099 1.06350 69.4 1.05639 1.06105 1.06350 69.8 1.05630 1.06105 1.06342 70.2 1.05629 1.06105 1.06339 70.6 1.05637 1.06107 1.06335 71.0 1.05641 1.06107 1.06330 71.4 1.05641 1.06104 1.06330 71.8 1.05635 1.06091 1.06331 72.2 1.05621 1.06095 1.06334 72.6 1.05624 1.06107 1.06323 73.0 1.05627 1.06109 1.06320 73.4 1.05631 1.06117 1.06318 73.8 1.05632 1.06123 1.06315 74.2 1.05630 1.06135 1.06309 74.6 1.05624 1.06139 1.06310 75.0 1.05627 1.06144 1.06312 75.4 1.05622 1.06155 1.06310 75.8 1.05617 1.06177 1.06310 76.2 1.05627 1.06186 1.06314 76.6 1.05602 1.06182 1.06314 77.0 1.05574 1.06185 1.06305 77.4 1.05583 1.06181 1.06301 77.8 1.05590 1.06187 1.06300 78.2 1.05593 1.06190 1.06312 78.6 1.05595 1.06196 1.06316 79.0 1.05591 1.06200 1.06324 79.4 1.05592 1.06200 1.06332 79.8 1.05600 1.06196 1.06343 80.2 1.05608 1.06193 1.06337 80.6 1.05633 1.06197 1.06350 81.0 1.05671 1.06196 1.06362 81.4 1.05672 1.06200 1.06382 81.8 1.05671 1.06200 1.06399 82.2 1.05675 1.06203 1.06407 82.6 1.05677 1.06196 1.06412 83.0 1.05677 1.06189 1.06412 83.4 1.05680 1.06180 1.06413 83.8 1.05682 1.06181 1.06411 84.2 1.05686 1.06177 1.06421 84.6 1.05692 1.06177 1.06417 85.0 1.05690 1.06170 1.06419 85.4 1.05687 1.06161 1.06412 85.8 1.05692 1.06152 1.06404 86.2 1.05689 1.06137 1.06394 86.6 1.05693 1.06143 1.06389 87.0 1.05699 1.06143 1.06384 87.4 1.05702 1.06142 1.06381 87.8 1.05709 1.06139 1.06377 88.2 1.05709 1.06141 1.06374 88.6 1.05710 1.06129 1.06368 89.0 1.05711 1.06133 1.06364 89.4 1.05711 1.06133 1.06361 89.8 1.05714 1.06133 1.06365 90.2 1.05723 1.06127 1.06371 90.6 1.05725 1.06109 1.06374 91.0 1.05726 1.06107 1.06374 91.4 1.05726 1.06106 1.06381 91.8 1.05724 1.06104 1.06387 92.2 1.05726 1.06090 1.06387 92.6 1.05726 1.06094 1.06400 93.0 1.05729 1.06087 1.06410 93.4 1.05740 1.06085 1.06422 93.8 1.05755 1.06082 1.06417 94.2 1.05733 1.06086 1.06420 94.6 1.05738 1.06100 1.06412 95.0 1.05738 1.06101 1.06415 95.4 1.05745 1.06108 1.06405 95.8 1.05756 1.06095 1.06398 96.2 1.05763 1.06102 1.06397 96.6 1.05768 1.06115 1.06381 97.0 1.05768 1.06116 1.06376 97.4 1.05767 1.06121 1.06358 97.8 1.05753 1.06129 1.06358 98.2 1.05780 1.06124 1.06343 98.6 1.05777 1.06123 1.06343 99.0 1.05785 1.06118 1.06335 99.4 1.05783 1.06101 1.06331 99.8 1.05776 1.06102 1.06326 100.2 1.05774 1.06092 1.06317 100.6 1.05772 1.06077 1.06318 101.0 1.05775 1.06070 1.06313 101.4 1.05767 1.06062 1.06312 101.8 1.05770 1.06055 1.06302 102.2 1.05767 1.06054 1.06300 102.6 1.05768 1.06043 1.06273 103.0 1.05763 1.06036 1.06254 103.4 1.05760 1.06039 1.06233 103.8 1.05768 1.06043 1.06238 104.2 1.05762 1.06044 1.06249 104.6 1.05760 1.06035 1.06256 105.0 1.05755 1.06032 1.06262 105.4 1.05756 1.06023 1.06270 105.8 1.05769 1.06019 1.06285 106.2 1.05765 1.06013 1.06300 106.6 1.05764 1.06009 1.06337 107.0 1.05766 1.06009 1.06362 107.4 1.05769 1.06008 1.06388 107.8 1.05765 1.06008 1.06389 108.2 1.05768 1.06011 1.06387 108.6 1.05768 1.06018 1.06390 109.0 1.05769 1.06023 1.06395 109.4 1.05766 1.06026 1.06403 109.8 1.05755 1.06027 1.06405 110.2 1.05757 1.06037 1.06406 110.6 1.05759 1.06034 1.06408 111.0 1.05758 1.06037 1.06416 111.4 1.05760 1.06040 1.06421 111.8 1.05764 1.06043 1.06422 112.2 1.05760 1.06047 1.06426 112.6 1.05762 1.06051 1.06428 113.0 1.05765 1.06053 1.06427 113.4 1.05776 1.06063 1.06420 113.8 1.05772 1.06067 1.06399 114.2 1.05768 1.06065 1.06378 114.6 1.05765 1.06074 NaN 115.0 1.05761 1.06065 NaN 115.4 1.05759 1.06069 NaN 115.8 1.05760 1.06070 NaN 116.2 1.05754 1.06065 NaN 116.6 1.05755 1.06064 NaN 117.0 1.05749 1.06068 NaN 117.4 1.05740 1.06066 NaN 117.8 1.05741 1.06069 NaN 118.2 1.05737 1.06075 NaN 118.6 1.05731 1.06077 NaN 119.0 1.05729 1.06088 NaN 119.4 1.05727 1.06081 NaN 119.8 1.05724 1.06079 NaN 120.2 1.05727 1.06077 NaN 120.6 1.05721 1.06083 NaN 121.0 1.05713 NaN NaN 121.4 1.05707 NaN NaN 121.8 1.05707 NaN NaN 122.2 1.05708 NaN NaN 122.6 1.05711 NaN NaN 123.0 1.05707 NaN NaN 123.4 1.05701 NaN NaN 123.8 1.05695 NaN NaN 124.2 1.05696 NaN NaN 124.6 1.05695 NaN NaN 125.0 1.05700 NaN NaN 125.4 1.05702 NaN NaN 125.8 1.05694 NaN NaN
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-35-5ade938462a0> in <module> 252 if i._Position == 11803: 253 listDF_byPosition.append(i._AlignedData) --> 254 Average_byPosition=pd.concat(listDF_byPosition,axis=1) 255 Std_byPosition = Average_byPosition.std(axis=1, level=0) 256 Average_byPosition = Average_byPosition.mean(axis=1, level=0) ~\Anaconda3\lib\site-packages\pandas\core\reshape\concat.py in concat(objs, axis, join, join_axes, ignore_index, keys, levels, names, verify_integrity, sort, copy) 253 verify_integrity=verify_integrity, 254 copy=copy, --> 255 sort=sort, 256 ) 257 ~\Anaconda3\lib\site-packages\pandas\core\reshape\concat.py in __init__(self, objs, axis, join, join_axes, keys, levels, names, ignore_index, verify_integrity, copy, sort) 302 303 if len(objs) == 0: --> 304 raise ValueError("No objects to concatenate") 305 306 if keys is None: ValueError: No objects to concatenate
color_dict = {'11803':'r','11950':'g','12150':'b','12350':'m'}
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array_offset = time_array_offset[0]
time_array = (time_array - time_array_offset+20)*1*400/1000
MA_Data = moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)
i._AlignedData = pd.DataFrame(MA_Data,time_array)
plt.plot(time_array,MA_Data,label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin, 2 Component PLSR',fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11803:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.plot(Average_byPosition,label='11803')
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 11950:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.plot(Average_byPosition,label='11950')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._StimType != 'None':
if i._Position == 12150:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.plot(Average_byPosition,label='12150')
plt.legend()
listDF_byPosition=[]
for i in objs:
if i._NumComp==2:
if i._Position == 12350:
listDF_byPosition.append(i._AlignedData)
Average_byPosition=pd.concat(listDF_byPosition,axis=1)
Average_byPosition = Average_byPosition.mean(axis=1, level=0)
plt.plot(Average_byPosition,label='12350')
plt.legend()
plt.show()
color_dict = {'11803':'r','11950':'g','12150':'b','12350':'m'}
listofDFs_PhasicHT1=[]
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000+11
listofDFs_PhasicHT1.append(pd.DataFrame((moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted 5HT (uM)'].iloc[:101]).to_numpy()),3)),time_array))
plt.plot(time_array,(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted 5HT (uM)'].iloc[:101]).to_numpy()),3)),label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Serotonin Phasic, 2 Component PLSR',fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listofDFs_PhasicDA1=[]
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000+11
listofDFs_PhasicDA1.append(pd.DataFrame((moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted DA (uM)'].iloc[:101]).to_numpy()),3)),time_array))
plt.plot(time_array,(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted DA (uM)'].iloc[:101]).to_numpy()),3)),label=i._BrainRegion+' '+str(i._Position-9000),color=color_dict[str(i._Position)])
plt.title('Predicted Striatal Dopamine Phasic, 2 Component PLSR',fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
color_dict = {'pre-SSRI':'r','post-SSRI':'g'}
listofDFs_PhasicHT2=[]
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000+11
listofDFs_PhasicHT2.append(pd.DataFrame((moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted 5HT (uM)'].iloc[:101]).to_numpy()),3)),time_array))
plt.plot(time_array,(moving_average(i._Data['Predicted 5HT (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted 5HT (uM)'].iloc[:101]).to_numpy()),3)),label=i._Drug + str(i._StimFreq),color=color_dict[str(i._Drug)])
plt.title('Predicted Striatal Serotonin Phasic, 2 Component PLSR',fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listofDFs_PhasicDA2=[]
for j in stimulationFiles:
for i in objs:
if i._StimType != 'None':
if str(i._SampleName) == 'Obj'+j[1:] and i._NumComp==2:
m = max(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10))
time_array_offset=([k for k, l in enumerate(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)) if l == m])
if int(time_array_offset[0]) >= 300:
time_array_offset = 230
time_array = np.arange(0,492)
time_array = (time_array - time_array_offset+20)*1*400/1000+11
listofDFs_PhasicDA2.append(pd.DataFrame((moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted DA (uM)'].iloc[:101]).to_numpy()),3)),time_array))
plt.plot(time_array,(moving_average(i._Data['Predicted DA (uM)'].to_numpy(),10)-np.round(np.mean((i._Data['Predicted DA (uM)'].iloc[:101]).to_numpy()),3)),label=i._Drug + str(i._StimFreq),color=color_dict[str(i._Drug)])
plt.title('Predicted Striatal Dopamine Phasic, 2 Component PLSR',fontsize=18)
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
listDF_byFreq=[]
for i in objs:
if i._NumComp==2:
if i._StimFreq == 40:
listDF_byFreq.append(i._AlignedData-i._AlignedData[:51].mean(axis=1, level=0)[0])
Average_byFreq=pd.concat(listDF_byFreq,axis=1)
Std_byFreq = Average_byFreq.std(axis=1, level=0)
Average_byFreq = Average_byFreq.mean(axis=1, level=0)
plt.fill_between(list(Average_byFreq.index.values),Average_byFreq[0]-Std_byFreq[0],Average_byFreq[0]+Std_byFreq[0],color='b',alpha=0.1)
plt.plot(Average_byFreq,label='40 Hz, N=2',color='b')
listDF_byFreq=[]
for i in objs:
if i._NumComp==2:
if i._StimFreq == 30:
if i._StimType != 'None':
listDF_byFreq.append(i._AlignedData-i._AlignedData[:51].mean(axis=1, level=0)[0])
Average_byFreq=pd.concat(listDF_byFreq,axis=1)
Std_byFreq = Average_byFreq.std(axis=1, level=0)
Average_byFreq = Average_byFreq.mean(axis=1, level=0)
plt.fill_between(list(Average_byFreq.index.values),Average_byFreq[0]-Std_byFreq[0],Average_byFreq[0]+Std_byFreq[0],color='r',alpha=0.1)
plt.plot(Average_byFreq,label='30 Hz, N=2',color='r')
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
plt.title('Predicted Average Striatal Serotonin, 3 Component PLSR',fontsize=18)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
df_PhasicHT1=pd.concat(listofDFs_PhasicHT1,axis=1)
df_PhasicHT1_Std=df_PhasicHT1.std(axis=1, level=0)
df_PhasicHT1_only_preSSRI=df_PhasicHT1.iloc[:,8:12]
df_PhasicHT1_only_preSSRI_Std=df_PhasicHT1_only_preSSRI.std(axis=1, level=0)
df_PhasicHT1_only_postSSRI=df_PhasicHT1.iloc[:,12:15]
df_PhasicHT1_only_postSSRI_Std=df_PhasicHT1_only_postSSRI.std(axis=1, level=0)
df_PhasicDA1=pd.concat(listofDFs_PhasicDA1,axis=1)
df_PhasicDA1_Std=df_PhasicDA1.std(axis=1, level=0)
df_PhasicDA1_only_preSSRI=df_PhasicDA1.iloc[:,8:12]
df_PhasicDA1_only_preSSRI_Std=df_PhasicDA1_only_preSSRI.std(axis=1, level=0)
df_PhasicDA1_only_postSSRI=df_PhasicDA1.iloc[:,12:15]
df_PhasicDA1_only_postSSRI_Std=df_PhasicDA1_only_postSSRI.std(axis=1, level=0)
plt.plot(df_PhasicHT1.mean(axis=1, level=0),label='HT (N=7)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1.mean(axis=1, level=0)-df_PhasicHT1_Std)[0],(df_PhasicHT1.mean(axis=1, level=0)+df_PhasicHT1_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicDA1.mean(axis=1, level=0),label='DA (N=7)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1.mean(axis=1, level=0)-df_PhasicDA1_Std)[0],(df_PhasicDA1.mean(axis=1, level=0)+df_PhasicDA1_Std)[0],color='r',alpha=0.1)
plt.title("Average All Phasic Release, Pos 3150",fontsize=18)
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
plt.plot(df_PhasicHT1_only_preSSRI.mean(axis=1, level=0),label='preSSRI (N=4)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1_only_preSSRI.mean(axis=1, level=0)-df_PhasicHT1_only_preSSRI_Std)[0],(df_PhasicHT1_only_preSSRI.mean(axis=1, level=0)+df_PhasicHT1_only_preSSRI_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicHT1_only_postSSRI.mean(axis=1, level=0),label='postSSRI (N=3)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1_only_postSSRI.mean(axis=1, level=0)-df_PhasicHT1_only_postSSRI_Std)[0],(df_PhasicHT1_only_postSSRI.mean(axis=1, level=0)+df_PhasicHT1_only_postSSRI_Std)[0],color='r',alpha=0.1)
plt.title("Average 5HT Phasic Release, Pos 3150",fontsize=18)
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
plt.plot(df_PhasicDA1_only_preSSRI.mean(axis=1, level=0),label='preSSRI (N=4)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1_only_preSSRI.mean(axis=1, level=0)-df_PhasicDA1_only_preSSRI_Std)[0],(df_PhasicDA1_only_preSSRI.mean(axis=1, level=0)+df_PhasicDA1_only_preSSRI_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicDA1_only_postSSRI.mean(axis=1, level=0),label='postSSRI (N=3)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1_only_postSSRI.mean(axis=1, level=0)-df_PhasicDA1_only_postSSRI_Std)[0],(df_PhasicDA1_only_postSSRI.mean(axis=1, level=0)+df_PhasicDA1_only_postSSRI_Std)[0],color='r',alpha=0.1)
plt.title("Average DA Phasic Release, Pos 3150",fontsize=18)
plt.legend()
plt.axvspan(0, 20, color='gold', alpha=0.3);
plt.xlabel('Time Since Stimulation Start (s)',fontsize=15)
plt.xticks(fontsize=12)
plt.ylabel('Phasic Concentration, uM',fontSize=15)
plt.yticks(fontsize=12)
fig = plt.gcf()
fig.set_size_inches(9, 5)
#removes duplicates from legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
df_PhasicHT1=pd.concat(listofDFs_PhasicHT1,axis=1)
df_PhasicHT1_Std=df_PhasicHT1.std(axis=1, level=0)
df_PhasicHT1_only_preSSRI=df_PhasicHT1.iloc[:,:12]
df_PhasicHT1_only_preSSRI_Std=df_PhasicHT1_only_preSSRI.std(axis=1, level=0)
df_PhasicHT1_only_postSSRI=df_PhasicHT1.iloc[:,12:]
df_PhasicHT1_only_postSSRI_Std=df_PhasicHT1_only_postSSRI.std(axis=1, level=0)
df_PhasicDA1=pd.concat(listofDFs_PhasicDA1,axis=1)
df_PhasicDA1_Std=df_PhasicDA1.std(axis=1, level=0)
df_PhasicDA1_only_preSSRI=df_PhasicDA1.iloc[:,:12]
df_PhasicDA1_only_preSSRI_Std=df_PhasicDA1_only_preSSRI.std(axis=1, level=0)
df_PhasicDA1_only_postSSRI=df_PhasicDA1.iloc[:,12:]
df_PhasicDA1_only_postSSRI_Std=df_PhasicDA1_only_postSSRI.std(axis=1, level=0)
plt.plot(df_PhasicHT1.mean(axis=1, level=0),label='HT (N=18)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1.mean(axis=1, level=0)-df_PhasicHT1_Std)[0],(df_PhasicHT1.mean(axis=1, level=0)+df_PhasicHT1_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicDA1.mean(axis=1, level=0),label='DA (N=18)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1.mean(axis=1, level=0)-df_PhasicDA1_Std)[0],(df_PhasicDA1.mean(axis=1, level=0)+df_PhasicDA1_Std)[0],color='r',alpha=0.1)
plt.title("Average All Phasic Release")
plt.legend()
plt.show()
plt.plot(df_PhasicHT1_only_preSSRI.mean(axis=1, level=0),label='preSSRI (N=12)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1_only_preSSRI.mean(axis=1, level=0)-df_PhasicHT1_only_preSSRI_Std)[0],(df_PhasicHT1_only_preSSRI.mean(axis=1, level=0)+df_PhasicHT1_only_preSSRI_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicHT1_only_postSSRI.mean(axis=1, level=0),label='postSSRI (N=6)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,554), (df_PhasicHT1_only_postSSRI.mean(axis=1, level=0)-df_PhasicHT1_only_postSSRI_Std)[0],(df_PhasicHT1_only_postSSRI.mean(axis=1, level=0)+df_PhasicHT1_only_postSSRI_Std)[0],color='r',alpha=0.1)
plt.title("Average 5HT Phasic Release")
plt.legend()
plt.show()
plt.plot(df_PhasicDA1_only_preSSRI.mean(axis=1, level=0),label='preSSRI (N=12)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1_only_preSSRI.mean(axis=1, level=0)-df_PhasicDA1_only_preSSRI_Std)[0],(df_PhasicDA1_only_preSSRI.mean(axis=1, level=0)+df_PhasicDA1_only_preSSRI_Std)[0],color='b',alpha=0.1)
plt.plot(df_PhasicDA1_only_postSSRI.mean(axis=1, level=0),label='postSSRI (N=6)')
plt.fill_between(np.linspace(-97.2+11,124.4+11,555), (df_PhasicDA1_only_postSSRI.mean(axis=1, level=0)-df_PhasicDA1_only_postSSRI_Std)[0],(df_PhasicDA1_only_postSSRI.mean(axis=1, level=0)+df_PhasicDA1_only_postSSRI_Std)[0],color='r',alpha=0.1)
plt.title("Average DA Phasic Release")
plt.legend()
plt.show()
for i in objs:
if i._NumComp == 2:
if i._StimFreq == 40:
--
if i_StimFreq == 30: